Fix XEN_SYSCTL_physinfo to handle NUMA info properly.
authorKeir Fraser <keir.fraser@citrix.com>
Wed, 14 May 2008 08:52:25 +0000 (09:52 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Wed, 14 May 2008 08:52:25 +0000 (09:52 +0100)
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/ia64/xen/dom0_ops.c
xen/arch/x86/sysctl.c

index af4e6b555f732b614212c1e28ff49a6f556bc5d3..4d00c68583817c56bd81d7576a8ce95d4e5afdb5 100644 (file)
@@ -407,10 +407,15 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
     {
         int i;
         uint32_t max_array_ent;
+        XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr;
 
         xen_sysctl_physinfo_t *pi = &op->u.physinfo;
 
+        max_array_ent = pi->max_cpu_id;
+        cpu_to_node_arr = pi->cpu_to_node;
+
         memset(pi, 0, sizeof(*pi));
+        pi->cpu_to_node = cpu_to_node_arr;
         pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
         pi->cores_per_socket =
             cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
@@ -421,16 +426,15 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
         pi->scrub_pages      = avail_scrub_pages();
         pi->cpu_khz          = local_cpu_data->proc_freq / 1000;
 
-        max_array_ent = pi->max_cpu_id;
         pi->max_cpu_id = last_cpu(cpu_online_map);
         max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
 
         ret = 0;
 
-        if (!guest_handle_is_null(pi->cpu_to_node)) {
+        if (!guest_handle_is_null(cpu_to_node_arr)) {
             for (i = 0; i <= max_array_ent; i++) {
                 uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
-                if (copy_to_guest_offset(pi->cpu_to_node, i, &node, 1)) {
+                if (copy_to_guest_offset(cpu_to_node_arr, i, &node, 1)) {
                     ret = -EFAULT;
                     break;
                 }
index 13f177a7fac174b80209e70a3763319466e2abb1..2fc125d4b9924494d6e75ca93bf8954002d947a5 100644 (file)
@@ -40,6 +40,7 @@ long arch_do_sysctl(
     case XEN_SYSCTL_physinfo:
     {
         uint32_t i, max_array_ent;
+        XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr;
 
         xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
 
@@ -47,7 +48,11 @@ long arch_do_sysctl(
         if ( ret )
             break;
 
+        max_array_ent = pi->max_cpu_id;
+        cpu_to_node_arr = pi->cpu_to_node;
+
         memset(pi, 0, sizeof(*pi));
+        pi->cpu_to_node = cpu_to_node_arr;
         pi->threads_per_core =
             cpus_weight(cpu_sibling_map[0]);
         pi->cores_per_socket =
@@ -64,22 +69,26 @@ long arch_do_sysctl(
         if ( iommu_enabled )
             pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
 
-        max_array_ent = pi->max_cpu_id;
         pi->max_cpu_id = last_cpu(cpu_online_map);
         max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
 
-        ret = -EFAULT;
-        if ( !guest_handle_is_null(pi->cpu_to_node) )
+        ret = 0;
+
+        if ( !guest_handle_is_null(cpu_to_node_arr) )
         {
             for ( i = 0; i <= max_array_ent; i++ )
             {
                 uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
-                if ( copy_to_guest_offset(pi->cpu_to_node, i, &node, 1) )
+                if ( copy_to_guest_offset(cpu_to_node_arr, i, &node, 1) )
+                {
+                    ret = -EFAULT;
                     break;
+                }
             }
         }
 
-        ret = copy_to_guest(u_sysctl, sysctl, 1) ? -EFAULT : 0;
+        if ( copy_to_guest(u_sysctl, sysctl, 1) )
+            ret = -EFAULT;
     }
     break;